obj-y += of-devtree.o
obj-y += of-devwalk.o
obj-y += ofd_fixup.o
+obj-y += ofd_fixup_memory.o
obj-y += physdev.o
obj-y += rtas.o
obj-y += setup.o
}
htab_alloc(d, htab_order_pages);
+ INIT_LIST_HEAD(&d->arch.extent_list);
+
return 0;
}
void domain_relinquish_resources(struct domain *d)
{
free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+ free_extents(d);
}
void arch_dump_domain_info(struct domain *d)
#include <asm/papr.h>
#include "oftree.h"
+#define log2(x) ffz(~(x))
+
extern int parseelfimage_32(struct domain_setup_info *dsi);
extern int loadelfimage_32(struct domain_setup_info *dsi);
struct domain_setup_info dsi;
ulong dst;
u64 *ofh_tree;
+ uint rma_nrpages = 1 << d->arch.rma_order;
ulong rma_sz = rma_size(d->arch.rma_order);
ulong rma = page_to_maddr(d->arch.rma_page);
+ uint htab_order;
start_info_t *si;
ulong eomem;
int am64 = 1;
/* By default DOM0 is allocated all available memory. */
d->max_pages = ~0U;
+ /* default is the max(1/16th of memory, CONFIG_MIN_DOM0_PAGES) */
if (dom0_nrpages == 0) {
- dom0_nrpages = 1UL << d->arch.rma_order;
+ dom0_nrpages = total_pages >> 4;
+
+ if (dom0_nrpages < CONFIG_MIN_DOM0_PAGES)
+ dom0_nrpages = CONFIG_MIN_DOM0_PAGES;
}
+ /* make sure we are at least as big as the RMA */
+ if (dom0_nrpages < rma_nrpages)
+ dom0_nrpages = rma_nrpages;
+ else
+ dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages);
+
d->tot_pages = dom0_nrpages;
ASSERT(d->tot_pages > 0);
+ htab_order = log2(d->tot_pages) - 6;
+ if (d->arch.htab.order > 0) {
+ /* we incorrectly allocate this too early so lets adjust if
+ * necessary */
+ printk("WARNING: htab allocated to early\n");
+ if (d->arch.htab.order < htab_order) {
+ printk("WARNING: htab reallocated for more memory: 0x%x\n",
+ htab_order);
+ htab_free(d);
+ htab_alloc(d, htab_order);
+ }
+ }
+
ASSERT( image_len < rma_sz );
si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
printk("DOM: pc = 0x%lx, r2 = 0x%lx\n", pc, r2);
- ofd_dom0_fixup(d, *ofh_tree + rma, si, dst - rma);
+ ofd_dom0_fixup(d, *ofh_tree + rma, si);
set_bit(_VCPUF_initialised, &v->vcpu_flags);
}
}
-/* mark all memory from modules onward as unused, skipping hole(s),
- * and returning size of hole(s) */
+/* mark all memory from modules onward as unused */
static void heap_init(struct membuf *mb, uint entries)
{
int i;
return 0;
}
+static uint add_extent(struct domain *d, struct page_info *pg, uint order)
+{
+ struct page_extents *pe;
+
+ pe = xmalloc(struct page_extents);
+ if (pe == NULL)
+ return 0;
+
+ pe->pg = pg;
+ pe->order = order;
+ pe->pfn = page_to_mfn(pg);
+
+ list_add_tail(&pe->pe_list, &d->arch.extent_list);
+
+ return pe->pfn;
+}
+
+void free_extents(struct domain *d)
+{
+ /* we just need to free the memory behind list */
+ struct list_head *list;
+ struct list_head *ent;
+ struct list_head *next;
+
+ list = &d->arch.extent_list;
+ ent = list->next;
+
+ while (ent != list) {
+ next = ent->next;
+ xfree(ent);
+ ent = next;
+ }
+}
+
+uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
+{
+ uint ext_order;
+ uint ext_nrpages;
+ uint total_nrpages;
+ struct page_info *pg;
+
+ ext_order = cpu_extent_order();
+ ext_nrpages = 1 << ext_order;
+
+ total_nrpages = rma_nrpages;
+
+ /* We only allocate in nr_extsz chunks so if you are not divisible
+ * you get more than you asked for */
+ while (total_nrpages < nrpages) {
+ pg = alloc_domheap_pages(d, ext_order, 0);
+ if (pg == NULL)
+ return total_nrpages;
+
+ if (add_extent(d, pg, ext_order) == 0) {
+ free_domheap_pages(pg, ext_order);
+ return total_nrpages;
+ }
+ total_nrpages += ext_nrpages;
+ }
+
+ return total_nrpages;
+}
+
int allocate_rma(struct domain *d, unsigned int order_pages)
{
ulong rma_base;
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
ulong rma_size_mfn = 1UL << d->arch.rma_order;
+ struct page_extents *pe;
if (pfn < rma_size_mfn) {
if (type)
return pfn;
}
+ /* quick tests first */
+ list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+ uint end_pfn = pe->pfn + (1 << pe->order);
+
+ if (pfn >= pe->pfn && pfn < end_pfn) {
+ if (type)
+ *type = PFN_TYPE_LOGICAL;
+ return page_to_mfn(pe->pg) + (pfn - pe->pfn);
+ }
+ }
+
/* This hack allows dom0 to map all memory, necessary to
* initialize domU state. */
if (test_bit(_DOMF_privileged, &d->domain_flags)) {
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005, 2006
*
* Authors: Jimi Xenidis <jimix@watson.ibm.com>
*/
}
#endif
-struct mem_reg {
- u64 addr;
- u64 sz;
-};
-
-static ofdn_t ofd_memory_chunk_create(void *m, ofdn_t p,
- const char *ppath,
- const char *name,
- const char *dt,
- ulong start, ulong size)
-{
- struct mem_reg reg;
- char path[128];
- ulong l;
- u32 v;
- ofdn_t n;
- ulong nl = strlen(name) + 1;
- ulong dtl = strlen(dt) + 1;
-
- l = snprintf(path, sizeof (path), "%s/%s@%lx", ppath, name, start);
- n = ofd_node_add(m, p, path, l + 1);
- ofd_prop_add(m, n, "name", name, nl);
-
- v = 1;
- ofd_prop_add(m, n, "#address-cells", &v, sizeof (v));
- v = 0;
- ofd_prop_add(m, n, "#size-cells", &v, sizeof (v));
-
- ofd_prop_add(m, n, "device_type", dt, dtl);
-
- /* physical addresses usable without regard to OF */
- reg.addr = start;
- reg.sz = size;
- ofd_prop_add(m, n, "reg", ®, sizeof (reg));
-
- return n;
-}
-
-static ofdn_t ofd_memory_props(void *m, struct domain *d, ulong eoload)
-{
- ofdn_t n = -1;
- ulong start = 0;
- static char name[] = "memory";
- ulong mem_size = rma_size(d->arch.rma_order);
- ulong chunk_size = rma_size(d->arch.rma_order);
-
- /* Remove all old memory props */
- do {
- ofdn_t old;
-
- old = ofd_node_find_by_prop(m, OFD_ROOT, "device_type",
- name, sizeof(name));
- if (old <= 0) break;
-
- ofd_node_prune(m, old);
- } while (1);
-
- while (start < mem_size) {
- ulong size = (mem_size < chunk_size) ? mem_size : chunk_size;
-
- n = ofd_memory_chunk_create(m, OFD_ROOT, "", "memory", "memory",
- start, size);
-
- if (start == 0) {
- /* We are processing the first and RMA chunk */
-
- /* free list of physical addresses available after OF and
- * client program have been accounted for */
- struct mem_reg avail[] = {
- /* 0 til OF @ 32MiB - 16KiB stack */
- { .addr = 0, .sz = ((32 << 20) - (16 << 10)) },
- /* end of loaded material to the end the chunk - 1 page */
- { .addr = eoload, .sz = chunk_size - eoload - PAGE_SIZE },
- /* the last page is reserved for xen_start_info */
- };
- ofd_prop_add(m, n, "available", &avail,
- sizeof (avail));
- }
-
- start += size;
- mem_size -= size;
- }
- return n;
-}
-
static ofdn_t ofd_xen_props(void *m, struct domain *d, start_info_t *si)
{
ofdn_t n;
return n;
}
-int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si, ulong eoload)
+int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si)
{
void *m;
const ofdn_t n = OFD_ROOT;
printk("Add /chosen props\n");
ofd_chosen_props(m, (char *)si->cmd_line);
- printk("fix /memory@0 props\n");
- ofd_memory_props(m, d, eoload);
+ printk("fix /memory props\n");
+ ofd_memory_props(m, d);
printk("fix /xen props\n");
ofd_xen_props(m, d, si);
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <public/xen.h>
+#include "of-devtree.h"
+#include "oftree.h"
+
+static char memory[] = "memory";
+
+struct mem_reg {
+ u64 addr;
+ u64 sz;
+};
+
+static void ofd_memory_clean(void *m)
+{
+ ofdn_t old;
+
+ /* Remove all old memory props */
+ do {
+ old = ofd_node_find_by_prop(m, OFD_ROOT, "device_type",
+ memory, sizeof(memory));
+ if (old <= 0)
+ break;
+
+ ofd_node_prune(m, old);
+ } while (1);
+}
+
+static ofdn_t ofd_memory_node_create(
+ void *m, ofdn_t p, const char *ppath, const char *name,
+ const char *dt, ulong start, ulong size)
+{
+ struct mem_reg reg;
+ char path[128];
+ ulong l;
+ ofdn_t n;
+ ulong nl = strlen(name) + 1;
+ ulong dtl = strlen(dt) + 1;
+
+ l = snprintf(path, sizeof (path), "%s/%s@%lx", ppath, name, start);
+ n = ofd_node_add(m, p, path, l + 1);
+ ofd_prop_add(m, n, "name", name, nl);
+ ofd_prop_add(m, n, "device_type", dt, dtl);
+
+ /* physical addresses usable without regard to OF */
+ reg.addr = start;
+ reg.sz = size;
+ ofd_prop_add(m, n, "reg", ®, sizeof (reg));
+
+ return n;
+}
+
+static void ofd_memory_rma_node(void *m, struct domain *d)
+{
+ ulong size = rma_size(d->arch.rma_order);
+ ofdn_t n;
+
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory, 0, size);
+ BUG_ON(n <= 0);
+}
+
+static void ofd_memory_extent_nodes(void *m, struct domain *d)
+{
+ ulong start;
+ ulong size;
+ ofdn_t n;
+ struct page_extents *pe;
+
+ list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+
+ start = pe->pfn << PAGE_SHIFT;
+ size = 1UL << (pe->order + PAGE_SHIFT);
+
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+ start, size);
+
+ BUG_ON(n <= 0);
+ }
+}
+
+void ofd_memory_props(void *m, struct domain *d)
+{
+ ofd_memory_clean(m);
+ ofd_memory_rma_node(m, d);
+ ofd_memory_extent_nodes(m,d);
+}
extern ulong oftree_len;
extern ulong oftree_end;
-extern int ofd_dom0_fixup(
- struct domain *d, ulong mem, start_info_t *si, ulong dst);
+extern int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si);
+extern void ofd_memory_props(void *m, struct domain *d);
extern int firmware_image_start[0];
extern int firmware_image_size[0];
{ .order = 38, .rmlr0 = 0, .rmlr12 = 0, }, /* 256 GB */
};
+static uint log_large_page_sizes[] = {
+ 4 + 20, /* (1 << 4) == 16M */
+};
+
static struct rma_settings *cpu_find_rma(unsigned int order)
{
int i;
unsigned int cpu_large_page_orders(uint *sizes, uint max)
{
- uint lp_log_size = 4 + 20; /* (1 << 4) == 16M */
- if (max < 1)
- return 0;
+ uint i = 0;
- sizes[0] = lp_log_size - PAGE_SHIFT;
+ while (i < max && i < ARRAY_SIZE(log_large_page_sizes)) {
+ sizes[i] = log_large_page_sizes[i] - PAGE_SHIFT;
+ ++i;
+ }
- return 1;
-}
+ return i;
+}
+
+unsigned int cpu_extent_order(void)
+{
+ return log_large_page_sizes[0] - PAGE_SHIFT;
+}
void cpu_initialize(int cpuid)
{
/* this should be per processor, but for now */
#define CACHE_LINE_SIZE 128
+/* 256M - 64M of Xen space seems like a nice number */
+#define CONFIG_MIN_DOM0_PAGES (192 << (20 - PAGE_SHIFT))
#define CONFIG_SHADOW 1
#define CONFIG_GDB 1
#define CONFIG_SMP 1
struct page_info *rma_page;
uint rma_order;
+ /* list of extents beyond RMA */
+ struct list_head extent_list;
+
/* I/O-port access bitmap mask. */
u8 *iobmp_mask; /* Address of IO bitmap mask, or NULL. */
#define mfn_to_gmfn(_d, mfn) (mfn)
extern int allocate_rma(struct domain *d, unsigned int order_pages);
+extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
+extern void free_extents(struct domain *d);
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
extern void show_registers(struct cpu_user_regs *);
extern void show_execution_state(struct cpu_user_regs *);
extern void show_backtrace(ulong sp, ulong lr, ulong pc);
+extern unsigned int cpu_extent_order(void);
extern unsigned int cpu_default_rma_order_pages(void);
extern uint cpu_large_page_orders(uint *sizes, uint max);
extern void cpu_initialize(int cpuid);